@@ -191,21 +191,21 @@ int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
191191bool truncate_inode_partial_folio (struct folio * folio , loff_t start , loff_t end )
192192{
193193 loff_t pos = folio_pos (folio );
194+ size_t size = folio_size (folio );
194195 unsigned int offset , length ;
195196 struct page * split_at , * split_at2 ;
196197
197198 if (pos < start )
198199 offset = start - pos ;
199200 else
200201 offset = 0 ;
201- length = folio_size (folio );
202- if (pos + length <= (u64 )end )
203- length = length - offset ;
202+ if (pos + size <= (u64 )end )
203+ length = size - offset ;
204204 else
205205 length = end + 1 - pos - offset ;
206206
207207 folio_wait_writeback (folio );
208- if (length == folio_size ( folio ) ) {
208+ if (length == size ) {
209209 truncate_inode_folio (folio -> mapping , folio );
210210 return true;
211211 }
@@ -224,16 +224,20 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
224224 return true;
225225
226226 split_at = folio_page (folio , PAGE_ALIGN_DOWN (offset ) / PAGE_SIZE );
227- split_at2 = folio_page (folio ,
228- PAGE_ALIGN_DOWN (offset + length ) / PAGE_SIZE );
229-
230227 if (!try_folio_split (folio , split_at , NULL )) {
231228 /*
232229 * try to split at offset + length to make sure folios within
233230 * the range can be dropped, especially to avoid memory waste
234231 * for shmem truncate
235232 */
236- struct folio * folio2 = page_folio (split_at2 );
233+ struct folio * folio2 ;
234+
235+ if (offset + length == size )
236+ goto no_split ;
237+
238+ split_at2 = folio_page (folio ,
239+ PAGE_ALIGN_DOWN (offset + length ) / PAGE_SIZE );
240+ folio2 = page_folio (split_at2 );
237241
238242 if (!folio_try_get (folio2 ))
239243 goto no_split ;
0 commit comments