25
25
26
26
/* Enlarge the array size in order to fully show blkback name. */
27
27
#define BLKBACK_NAME_LEN (20)
28
+ #define RINGREF_NAME_LEN (20)
28
29
29
30
struct backend_info {
30
31
struct xenbus_device * dev ;
@@ -156,16 +157,16 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
156
157
return blkif ;
157
158
}
158
159
159
- static int xen_blkif_map (struct xen_blkif * blkif , grant_ref_t gref ,
160
- unsigned int evtchn )
160
+ static int xen_blkif_map (struct xen_blkif * blkif , grant_ref_t * gref ,
161
+ unsigned int nr_grefs , unsigned int evtchn )
161
162
{
162
163
int err ;
163
164
164
165
/* Already connected through? */
165
166
if (blkif -> irq )
166
167
return 0 ;
167
168
168
- err = xenbus_map_ring_valloc (blkif -> be -> dev , & gref , 1 ,
169
+ err = xenbus_map_ring_valloc (blkif -> be -> dev , gref , nr_grefs ,
169
170
& blkif -> blk_ring );
170
171
if (err < 0 )
171
172
return err ;
@@ -175,21 +176,21 @@ static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t gref,
175
176
{
176
177
struct blkif_sring * sring ;
177
178
sring = (struct blkif_sring * )blkif -> blk_ring ;
178
- BACK_RING_INIT (& blkif -> blk_rings .native , sring , PAGE_SIZE );
179
+ BACK_RING_INIT (& blkif -> blk_rings .native , sring , PAGE_SIZE * nr_grefs );
179
180
break ;
180
181
}
181
182
case BLKIF_PROTOCOL_X86_32 :
182
183
{
183
184
struct blkif_x86_32_sring * sring_x86_32 ;
184
185
sring_x86_32 = (struct blkif_x86_32_sring * )blkif -> blk_ring ;
185
- BACK_RING_INIT (& blkif -> blk_rings .x86_32 , sring_x86_32 , PAGE_SIZE );
186
+ BACK_RING_INIT (& blkif -> blk_rings .x86_32 , sring_x86_32 , PAGE_SIZE * nr_grefs );
186
187
break ;
187
188
}
188
189
case BLKIF_PROTOCOL_X86_64 :
189
190
{
190
191
struct blkif_x86_64_sring * sring_x86_64 ;
191
192
sring_x86_64 = (struct blkif_x86_64_sring * )blkif -> blk_ring ;
192
- BACK_RING_INIT (& blkif -> blk_rings .x86_64 , sring_x86_64 , PAGE_SIZE );
193
+ BACK_RING_INIT (& blkif -> blk_rings .x86_64 , sring_x86_64 , PAGE_SIZE * nr_grefs );
193
194
break ;
194
195
}
195
196
default :
@@ -270,7 +271,7 @@ static void xen_blkif_free(struct xen_blkif *blkif)
270
271
i ++ ;
271
272
}
272
273
273
- WARN_ON (i != XEN_BLKIF_REQS_PER_PAGE );
274
+ WARN_ON (i != ( XEN_BLKIF_REQS_PER_PAGE * blkif -> nr_ring_pages ) );
274
275
275
276
kmem_cache_free (xen_blkif_cachep , blkif );
276
277
}
@@ -555,6 +556,11 @@ static int xen_blkbk_probe(struct xenbus_device *dev,
555
556
if (err )
556
557
goto fail ;
557
558
559
+ err = xenbus_printf (XBT_NIL , dev -> nodename , "max-ring-page-order" , "%u" ,
560
+ xen_blkif_max_ring_order );
561
+ if (err )
562
+ pr_warn ("%s write out 'max-ring-page-order' failed\n" , __func__ );
563
+
558
564
err = xenbus_switch_state (dev , XenbusStateInitWait );
559
565
if (err )
560
566
goto fail ;
@@ -818,23 +824,66 @@ static void connect(struct backend_info *be)
818
824
static int connect_ring (struct backend_info * be )
819
825
{
820
826
struct xenbus_device * dev = be -> dev ;
821
- unsigned long ring_ref ;
822
- unsigned int evtchn ;
827
+ unsigned int ring_ref [ XENBUS_MAX_RING_PAGES ] ;
828
+ unsigned int evtchn , nr_grefs , ring_page_order ;
823
829
unsigned int pers_grants ;
824
830
char protocol [64 ] = "" ;
825
831
struct pending_req * req , * n ;
826
832
int err , i , j ;
827
833
828
834
pr_debug ("%s %s\n" , __func__ , dev -> otherend );
829
835
830
- err = xenbus_gather (XBT_NIL , dev -> otherend , "ring-ref " , "%lu " ,
831
- & ring_ref , "event-channel" , "%u" , & evtchn , NULL );
832
- if (err ) {
833
- xenbus_dev_fatal ( dev , err ,
834
- "reading %s/ring-ref and event-channel" ,
836
+ err = xenbus_scanf (XBT_NIL , dev -> otherend , "event-channel " , "%u " ,
837
+ & evtchn );
838
+ if (err != 1 ) {
839
+ err = - EINVAL ;
840
+ xenbus_dev_fatal ( dev , err , "reading %s/event-channel" ,
835
841
dev -> otherend );
836
842
return err ;
837
843
}
844
+ pr_info ("event-channel %u\n" , evtchn );
845
+
846
+ err = xenbus_scanf (XBT_NIL , dev -> otherend , "ring-page-order" , "%u" ,
847
+ & ring_page_order );
848
+ if (err != 1 ) {
849
+ err = xenbus_scanf (XBT_NIL , dev -> otherend , "ring-ref" ,
850
+ "%u" , & ring_ref [0 ]);
851
+ if (err != 1 ) {
852
+ err = - EINVAL ;
853
+ xenbus_dev_fatal (dev , err , "reading %s/ring-ref" ,
854
+ dev -> otherend );
855
+ return err ;
856
+ }
857
+ nr_grefs = 1 ;
858
+ pr_info ("%s:using single page: ring-ref %d\n" , dev -> otherend ,
859
+ ring_ref [0 ]);
860
+ } else {
861
+ unsigned int i ;
862
+
863
+ if (ring_page_order > xen_blkif_max_ring_order ) {
864
+ err = - EINVAL ;
865
+ xenbus_dev_fatal (dev , err , "%s/request %d ring page order exceed max:%d" ,
866
+ dev -> otherend , ring_page_order ,
867
+ xen_blkif_max_ring_order );
868
+ return err ;
869
+ }
870
+
871
+ nr_grefs = 1 << ring_page_order ;
872
+ for (i = 0 ; i < nr_grefs ; i ++ ) {
873
+ char ring_ref_name [RINGREF_NAME_LEN ];
874
+
875
+ snprintf (ring_ref_name , RINGREF_NAME_LEN , "ring-ref%u" , i );
876
+ err = xenbus_scanf (XBT_NIL , dev -> otherend , ring_ref_name ,
877
+ "%u" , & ring_ref [i ]);
878
+ if (err != 1 ) {
879
+ err = - EINVAL ;
880
+ xenbus_dev_fatal (dev , err , "reading %s/%s" ,
881
+ dev -> otherend , ring_ref_name );
882
+ return err ;
883
+ }
884
+ pr_info ("ring-ref%u: %u\n" , i , ring_ref [i ]);
885
+ }
886
+ }
838
887
839
888
be -> blkif -> blk_protocol = BLKIF_PROTOCOL_DEFAULT ;
840
889
err = xenbus_gather (XBT_NIL , dev -> otherend , "protocol" ,
@@ -859,12 +908,13 @@ static int connect_ring(struct backend_info *be)
859
908
860
909
be -> blkif -> vbd .feature_gnt_persistent = pers_grants ;
861
910
be -> blkif -> vbd .overflow_max_grants = 0 ;
911
+ be -> blkif -> nr_ring_pages = nr_grefs ;
862
912
863
- pr_info ("ring-ref %ld , event-channel %d, protocol %d (%s) %s\n" ,
864
- ring_ref , evtchn , be -> blkif -> blk_protocol , protocol ,
913
+ pr_info ("ring-pages:%d , event-channel %d, protocol %d (%s) %s\n" ,
914
+ nr_grefs , evtchn , be -> blkif -> blk_protocol , protocol ,
865
915
pers_grants ? "persistent grants" : "" );
866
916
867
- for (i = 0 ; i < XEN_BLKIF_REQS_PER_PAGE ; i ++ ) {
917
+ for (i = 0 ; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE ; i ++ ) {
868
918
req = kzalloc (sizeof (* req ), GFP_KERNEL );
869
919
if (!req )
870
920
goto fail ;
@@ -883,10 +933,9 @@ static int connect_ring(struct backend_info *be)
883
933
}
884
934
885
935
/* Map the shared frame, irq etc. */
886
- err = xen_blkif_map (be -> blkif , ring_ref , evtchn );
936
+ err = xen_blkif_map (be -> blkif , ring_ref , nr_grefs , evtchn );
887
937
if (err ) {
888
- xenbus_dev_fatal (dev , err , "mapping ring-ref %lu port %u" ,
889
- ring_ref , evtchn );
938
+ xenbus_dev_fatal (dev , err , "mapping ring-ref port %u" , evtchn );
890
939
return err ;
891
940
}
892
941
0 commit comments