@@ -252,51 +252,84 @@ void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
252252}
253253#endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
254254
255- static int __meminit __add_section (int nid , unsigned long phys_start_pfn ,
256- struct vmem_altmap * altmap )
255+ static int __meminit __add_section (int nid , unsigned long pfn ,
256+ unsigned long nr_pages , struct vmem_altmap * altmap )
257257{
258258 int ret ;
259259
260- if (pfn_valid (phys_start_pfn ))
260+ if (pfn_valid (pfn ))
261261 return - EEXIST ;
262262
263- ret = sparse_add_one_section (nid , phys_start_pfn , altmap );
263+ ret = sparse_add_section (nid , pfn , nr_pages , altmap );
264264 return ret < 0 ? ret : 0 ;
265265}
266266
267+ static int check_pfn_span (unsigned long pfn , unsigned long nr_pages ,
268+ const char * reason )
269+ {
270+ /*
271+ * Disallow all operations smaller than a sub-section and only
272+ * allow operations smaller than a section for
273+ * SPARSEMEM_VMEMMAP. Note that check_hotplug_memory_range()
274+ * enforces a larger memory_block_size_bytes() granularity for
275+ * memory that will be marked online, so this check should only
276+ * fire for direct arch_{add,remove}_memory() users outside of
277+ * add_memory_resource().
278+ */
279+ unsigned long min_align ;
280+
281+ if (IS_ENABLED (CONFIG_SPARSEMEM_VMEMMAP ))
282+ min_align = PAGES_PER_SUBSECTION ;
283+ else
284+ min_align = PAGES_PER_SECTION ;
285+ if (!IS_ALIGNED (pfn , min_align )
286+ || !IS_ALIGNED (nr_pages , min_align )) {
287+ WARN (1 , "Misaligned __%s_pages start: %#lx end: #%lx\n" ,
288+ reason , pfn , pfn + nr_pages - 1 );
289+ return - EINVAL ;
290+ }
291+ return 0 ;
292+ }
293+
267294/*
268295 * Reasonably generic function for adding memory. It is
269296 * expected that archs that support memory hotplug will
270297 * call this function after deciding the zone to which to
271298 * add the new pages.
272299 */
273- int __ref __add_pages (int nid , unsigned long phys_start_pfn ,
274- unsigned long nr_pages , struct mhp_restrictions * restrictions )
300+ int __ref __add_pages (int nid , unsigned long pfn , unsigned long nr_pages ,
301+ struct mhp_restrictions * restrictions )
275302{
276303 unsigned long i ;
277- int err = 0 ;
278- int start_sec , end_sec ;
304+ int start_sec , end_sec , err ;
279305 struct vmem_altmap * altmap = restrictions -> altmap ;
280306
281- /* during initialize mem_map, align hot-added range to section */
282- start_sec = pfn_to_section_nr (phys_start_pfn );
283- end_sec = pfn_to_section_nr (phys_start_pfn + nr_pages - 1 );
284-
285307 if (altmap ) {
286308 /*
287309 * Validate altmap is within bounds of the total request
288310 */
289- if (altmap -> base_pfn != phys_start_pfn
311+ if (altmap -> base_pfn != pfn
290312 || vmem_altmap_offset (altmap ) > nr_pages ) {
291313 pr_warn_once ("memory add fail, invalid altmap\n" );
292- err = - EINVAL ;
293- goto out ;
314+ return - EINVAL ;
294315 }
295316 altmap -> alloc = 0 ;
296317 }
297318
319+ err = check_pfn_span (pfn , nr_pages , "add" );
320+ if (err )
321+ return err ;
322+
323+ start_sec = pfn_to_section_nr (pfn );
324+ end_sec = pfn_to_section_nr (pfn + nr_pages - 1 );
298325 for (i = start_sec ; i <= end_sec ; i ++ ) {
299- err = __add_section (nid , section_nr_to_pfn (i ), altmap );
326+ unsigned long pfns ;
327+
328+ pfns = min (nr_pages , PAGES_PER_SECTION
329+ - (pfn & ~PAGE_SECTION_MASK ));
330+ err = __add_section (nid , pfn , pfns , altmap );
331+ pfn += pfns ;
332+ nr_pages -= pfns ;
300333
301334 /*
302335 * EEXIST is finally dealt with by ioresource collision
@@ -309,7 +342,6 @@ int __ref __add_pages(int nid, unsigned long phys_start_pfn,
309342 cond_resched ();
310343 }
311344 vmemmap_populate_print_last ();
312- out :
313345 return err ;
314346}
315347
@@ -487,10 +519,10 @@ static void shrink_pgdat_span(struct pglist_data *pgdat,
487519 pgdat -> node_spanned_pages = 0 ;
488520}
489521
490- static void __remove_zone (struct zone * zone , unsigned long start_pfn )
522+ static void __remove_zone (struct zone * zone , unsigned long start_pfn ,
523+ unsigned long nr_pages )
491524{
492525 struct pglist_data * pgdat = zone -> zone_pgdat ;
493- int nr_pages = PAGES_PER_SECTION ;
494526 unsigned long flags ;
495527
496528 pgdat_resize_lock (zone -> zone_pgdat , & flags );
@@ -499,27 +531,23 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn)
499531 pgdat_resize_unlock (zone -> zone_pgdat , & flags );
500532}
501533
502- static void __remove_section (struct zone * zone , struct mem_section * ms ,
503- unsigned long map_offset ,
504- struct vmem_altmap * altmap )
534+ static void __remove_section (struct zone * zone , unsigned long pfn ,
535+ unsigned long nr_pages , unsigned long map_offset ,
536+ struct vmem_altmap * altmap )
505537{
506- unsigned long start_pfn ;
507- int scn_nr ;
538+ struct mem_section * ms = __nr_to_section (pfn_to_section_nr (pfn ));
508539
509540 if (WARN_ON_ONCE (!valid_section (ms )))
510541 return ;
511542
512- scn_nr = __section_nr (ms );
513- start_pfn = section_nr_to_pfn ((unsigned long )scn_nr );
514- __remove_zone (zone , start_pfn );
515-
516- sparse_remove_one_section (ms , map_offset , altmap );
543+ __remove_zone (zone , pfn , nr_pages );
544+ sparse_remove_one_section (ms , pfn , nr_pages , map_offset , altmap );
517545}
518546
519547/**
520548 * __remove_pages() - remove sections of pages from a zone
521549 * @zone: zone from which pages need to be removed
522- * @phys_start_pfn : starting pageframe (must be aligned to start of a section)
550+ * @pfn : starting pageframe (must be aligned to start of a section)
523551 * @nr_pages: number of pages to remove (must be multiple of section size)
524552 * @altmap: alternative device page map or %NULL if default memmap is used
525553 *
@@ -528,30 +556,30 @@ static void __remove_section(struct zone *zone, struct mem_section *ms,
528556 * sure that pages are marked reserved and zones are adjust properly by
529557 * calling offline_pages().
530558 */
531- void __remove_pages (struct zone * zone , unsigned long phys_start_pfn ,
559+ void __remove_pages (struct zone * zone , unsigned long pfn ,
532560 unsigned long nr_pages , struct vmem_altmap * altmap )
533561{
534- unsigned long i ;
535562 unsigned long map_offset = 0 ;
536- int sections_to_remove ;
563+ int i , start_sec , end_sec ;
537564
538565 map_offset = vmem_altmap_offset (altmap );
539566
540567 clear_zone_contiguous (zone );
541568
542- /*
543- * We can only remove entire sections
544- */
545- BUG_ON (phys_start_pfn & ~PAGE_SECTION_MASK );
546- BUG_ON (nr_pages % PAGES_PER_SECTION );
569+ if (check_pfn_span (pfn , nr_pages , "remove" ))
570+ return ;
547571
548- sections_to_remove = nr_pages / PAGES_PER_SECTION ;
549- for (i = 0 ; i < sections_to_remove ; i ++ ) {
550- unsigned long pfn = phys_start_pfn + i * PAGES_PER_SECTION ;
572+ start_sec = pfn_to_section_nr (pfn );
573+ end_sec = pfn_to_section_nr (pfn + nr_pages - 1 );
574+ for (i = start_sec ; i <= end_sec ; i ++ ) {
575+ unsigned long pfns ;
551576
552577 cond_resched ();
553- __remove_section (zone , __pfn_to_section (pfn ), map_offset ,
554- altmap );
578+ pfns = min (nr_pages , PAGES_PER_SECTION
579+ - (pfn & ~PAGE_SECTION_MASK ));
580+ __remove_section (zone , pfn , pfns , map_offset , altmap );
581+ pfn += pfns ;
582+ nr_pages -= pfns ;
555583 map_offset = 0 ;
556584 }
557585
0 commit comments