1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
|
diff -urNp linux-2.6.10/drivers/char/moxa.c linux-2.6.10-new/drivers/char/moxa.c
--- linux-2.6.10/drivers/char/moxa.c 2005-01-07 10:51:23 -0500
+++ linux-2.6.10-new/drivers/char/moxa.c 2005-01-07 10:51:33 -0500
@@ -1668,6 +1668,8 @@ int MoxaDriverIoctl(unsigned int cmd, un
return -EFAULT;
if(dltmp.cardno < 0 || dltmp.cardno >= MAX_BOARDS)
return -EINVAL;
+ if(dltmp.len < 0 || dltmp.len > sizeof(moxaBuff))
+ return -EINVAL;
switch(cmd)
{
@@ -2822,8 +2824,6 @@ static int moxaload320b(int cardno, unsi
void __iomem *baseAddr;
int i;
- if(len > sizeof(moxaBuff))
- return -EINVAL;
if(copy_from_user(moxaBuff, tmp, len))
return -EFAULT;
baseAddr = moxaBaseAddr[cardno];
diff -urNp linux-2.6.10/drivers/block/scsi_ioctl.c linux-2.6.10-new/drivers/block/scsi_ioctl.c
--- linux-2.6.10/drivers/block/scsi_ioctl.c 2005-01-07 10:51:24 -0500
+++ linux-2.6.10-new/drivers/block/scsi_ioctl.c 2005-01-07 10:51:33 -0500
@@ -339,7 +339,8 @@ static int sg_scsi_ioctl(struct file *fi
struct gendisk *bd_disk, Scsi_Ioctl_Command __user *sic)
{
struct request *rq;
- int err, in_len, out_len, bytes, opcode, cmdlen;
+ unsigned int in_len, out_len, bytes, opcode, cmdlen;
+ int err;
char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
/*
diff -urNp linux-2.6.10/include/linux/writeback.h linux-2.6.10-new/include/linux/writeback.h
--- linux-2.6.10/include/linux/writeback.h 2005-01-07 10:51:22 -0500
+++ linux-2.6.10-new/include/linux/writeback.h 2005-01-07 10:51:33 -0500
@@ -86,6 +86,7 @@ static inline void wait_on_inode(struct
int wakeup_bdflush(long nr_pages);
void laptop_io_completion(void);
void laptop_sync_completion(void);
+void throttle_vm_writeout(void);
/* These are exported to sysctl. */
extern int dirty_background_ratio;
diff -urNp linux-2.6.10/drivers/char/random.c linux-2.6.10-new/drivers/char/random.c
--- linux-2.6.10/drivers/char/random.c 2005-01-07 10:51:23 -0500
+++ linux-2.6.10-new/drivers/char/random.c 2005-01-07 10:51:33 -0500
@@ -1912,7 +1912,7 @@ static int poolsize_strategy(ctl_table *
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen, void **context)
{
- int len;
+ size_t len;
sysctl_poolsize = random_state->poolinfo.POOLBYTES;
diff -urNp linux-2.6.10/mm/mmap.c linux-2.6.10-new/mm/mmap.c
--- linux-2.6.10/mm/mmap.c 2004-12-24 22:35:00.000000000 +0100
+++ linux-2.6.10-new/mm/mmap.c 2004-12-27 16:37:47.000000000 +0100
@@ -1360,6 +1360,13 @@ int expand_stack(struct vm_area_struct *
vm_unacct_memory(grow);
return -ENOMEM;
}
+ if ((vma->vm_flags & VM_LOCKED) && !capable(CAP_IPC_LOCK) &&
+ ((vma->vm_mm->locked_vm + grow) << PAGE_SHIFT) >
+ current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur) {
+ anon_vma_unlock(vma);
+ vm_unacct_memory(grow);
+ return -ENOMEM;
+ }
vma->vm_end = address;
vma->vm_mm->total_vm += grow;
if (vma->vm_flags & VM_LOCKED)
@@ -1422,6 +1429,13 @@ int expand_stack(struct vm_area_struct *
vm_unacct_memory(grow);
return -ENOMEM;
}
+ if ((vma->vm_flags & VM_LOCKED) && !capable(CAP_IPC_LOCK) &&
+ ((vma->vm_mm->locked_vm + grow) << PAGE_SHIFT) >
+ current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur) {
+ anon_vma_unlock(vma);
+ vm_unacct_memory(grow);
+ return -ENOMEM;
+ }
vma->vm_start = address;
vma->vm_pgoff -= grow;
vma->vm_mm->total_vm += grow;
diff -urNp linux-2.6.10/mm/page-writeback.c linux-2.6.10-new/mm/page-writeback.c
--- linux-2.6.10/mm/page-writeback.c 2005-01-07 10:51:24 -0500
+++ linux-2.6.10-new/mm/page-writeback.c 2005-01-07 10:51:33 -0500
@@ -276,6 +276,28 @@ void balance_dirty_pages_ratelimited(str
}
EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
+void throttle_vm_writeout(void)
+{
+ struct writeback_state wbs;
+ long background_thresh;
+ long dirty_thresh;
+
+ for ( ; ; ) {
+ get_dirty_limits(&wbs, &background_thresh, &dirty_thresh);
+
+ /*
+ * Boost the allowable dirty threshold a bit for page
+ * allocators so they don't get DoS'ed by heavy writers
+ */
+ dirty_thresh += dirty_thresh / 10; /* wheeee... */
+
+ if (wbs.nr_unstable + wbs.nr_writeback <= dirty_thresh)
+ break;
+ blk_congestion_wait(WRITE, HZ/10);
+ }
+}
+
+
/*
* writeback at least _min_pages, and keep writing until the amount of dirty
* memory is less than the background threshold, or until we're all clean.
diff -urNp linux-2.6.10/mm/vmscan.c linux-2.6.10-new/mm/vmscan.c
--- linux-2.6.10/mm/vmscan.c 2005-01-07 10:51:24 -0500
+++ linux-2.6.10-new/mm/vmscan.c 2005-01-07 10:51:33 -0500
@@ -369,14 +369,14 @@ static int shrink_list(struct list_head
BUG_ON(PageActive(page));
- if (PageWriteback(page))
- goto keep_locked;
-
sc->nr_scanned++;
/* Double the slab pressure for mapped and swapcache pages */
if (page_mapped(page) || PageSwapCache(page))
sc->nr_scanned++;
+ if (PageWriteback(page))
+ goto keep_locked;
+
referenced = page_referenced(page, 1, sc->priority <= 0);
/* In active use or really unfreeable? Activate it. */
if (referenced && page_mapping_inuse(page))
@@ -825,6 +825,8 @@ shrink_zone(struct zone *zone, struct sc
break;
}
}
+
+ throttle_vm_writeout();
}
/*
diff -urNp linux-2.6.10/net/ipv4/netfilter/ip_conntrack_proto_tcp.c linux-2.6.10-new/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
--- linux-2.6.10/net/ipv4/netfilter/ip_conntrack_proto_tcp.c 2005-01-07 10:51:24 -0500
+++ linux-2.6.10-new/net/ipv4/netfilter/ip_conntrack_proto_tcp.c 2005-01-07 10:51:33 -0500
@@ -906,7 +906,8 @@ static int tcp_packet(struct ip_conntrac
if (index == TCP_RST_SET
&& ((test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)
&& conntrack->proto.tcp.last_index <= TCP_SYNACK_SET)
- || conntrack->proto.tcp.last_index == TCP_ACK_SET)
+ || (!test_bit(IPS_ASSURED_BIT, &conntrack->status)
+ && conntrack->proto.tcp.last_index == TCP_ACK_SET))
&& after(ntohl(th->ack_seq),
conntrack->proto.tcp.last_seq)) {
/* Ignore RST closing down invalid SYN or ACK
|