@@ -72,6 +72,10 @@ mapped_kernel_page_is_present (unsigned long address)
7272 return pte_present (pte );
7373}
7474
75+ # define VM_READ_BIT 0
76+ # define VM_WRITE_BIT 1
77+ # define VM_EXEC_BIT 2
78+
7579void __kprobes
7680ia64_do_page_fault (unsigned long address , unsigned long isr , struct pt_regs * regs )
7781{
@@ -81,6 +85,12 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
8185 struct siginfo si ;
8286 unsigned long mask ;
8387 int fault ;
88+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE ;
89+
90+ mask = ((((isr >> IA64_ISR_X_BIT ) & 1UL ) << VM_EXEC_BIT )
91+ | (((isr >> IA64_ISR_W_BIT ) & 1UL ) << VM_WRITE_BIT ));
92+
93+ flags |= ((mask & VM_WRITE ) ? FAULT_FLAG_WRITE : 0 );
8494
8595 /* mmap_sem is performance critical.... */
8696 prefetchw (& mm -> mmap_sem );
@@ -109,6 +119,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
109119 if (notify_page_fault (regs , TRAP_BRKPT ))
110120 return ;
111121
122+ retry :
112123 down_read (& mm -> mmap_sem );
113124
114125 vma = find_vma_prev (mm , address , & prev_vma );
@@ -130,10 +141,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
130141
131142 /* OK, we've got a good vm_area for this memory area. Check the access permissions: */
132143
133- # define VM_READ_BIT 0
134- # define VM_WRITE_BIT 1
135- # define VM_EXEC_BIT 2
136-
137144# if (((1 << VM_READ_BIT ) != VM_READ || (1 << VM_WRITE_BIT ) != VM_WRITE ) \
138145 || (1 << VM_EXEC_BIT ) != VM_EXEC )
139146# error File is out of sync with <linux/mm.h>. Please update.
@@ -142,9 +149,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
142149 if (((isr >> IA64_ISR_R_BIT ) & 1UL ) && (!(vma -> vm_flags & (VM_READ | VM_WRITE ))))
143150 goto bad_area ;
144151
145- mask = ( (((isr >> IA64_ISR_X_BIT ) & 1UL ) << VM_EXEC_BIT )
146- | (((isr >> IA64_ISR_W_BIT ) & 1UL ) << VM_WRITE_BIT ));
147-
148152 if ((vma -> vm_flags & mask ) != mask )
149153 goto bad_area ;
150154
@@ -153,7 +157,11 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
153157 * sure we exit gracefully rather than endlessly redo the
154158 * fault.
155159 */
156- fault = handle_mm_fault (mm , vma , address , (mask & VM_WRITE ) ? FAULT_FLAG_WRITE : 0 );
160+ fault = handle_mm_fault (mm , vma , address , flags );
161+
162+ if ((fault & VM_FAULT_RETRY ) && fatal_signal_pending (current ))
163+ return ;
164+
157165 if (unlikely (fault & VM_FAULT_ERROR )) {
158166 /*
159167 * We ran out of memory, or some other thing happened
@@ -168,10 +176,24 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
168176 }
169177 BUG ();
170178 }
171- if (fault & VM_FAULT_MAJOR )
172- current -> maj_flt ++ ;
173- else
174- current -> min_flt ++ ;
179+
180+ if (flags & FAULT_FLAG_ALLOW_RETRY ) {
181+ if (fault & VM_FAULT_MAJOR )
182+ current -> maj_flt ++ ;
183+ else
184+ current -> min_flt ++ ;
185+ if (fault & VM_FAULT_RETRY ) {
186+ flags &= ~FAULT_FLAG_ALLOW_RETRY ;
187+
188+ /* No need to up_read(&mm->mmap_sem) as we would
189+ * have already released it in __lock_page_or_retry
190+ * in mm/filemap.c.
191+ */
192+
193+ goto retry ;
194+ }
195+ }
196+
175197 up_read (& mm -> mmap_sem );
176198 return ;
177199
0 commit comments